Chapter6 비지도 학습
1) 군집 알고리즘
비지도 학습-타깃이 없을 때 사용하는 머신러닝 알고리즘
In [50]:
#데이터 다운
!wget https://bit.ly/fruits_300_data -O fruits_300.npy
--2024-11-24 09:01:55-- https://bit.ly/fruits_300_data Resolving bit.ly (bit.ly)... 67.199.248.10, 67.199.248.11 Connecting to bit.ly (bit.ly)|67.199.248.10|:443... connected. HTTP request sent, awaiting response... 301 Moved Permanently Location: https://github.com/rickiepark/hg-mldl/raw/master/fruits_300.npy [following] --2024-11-24 09:01:55-- https://github.com/rickiepark/hg-mldl/raw/master/fruits_300.npy Resolving github.com (github.com)... 140.82.114.4 Connecting to github.com (github.com)|140.82.114.4|:443... connected. HTTP request sent, awaiting response... 302 Found Location: https://raw.githubusercontent.com/rickiepark/hg-mldl/master/fruits_300.npy [following] --2024-11-24 09:01:56-- https://raw.githubusercontent.com/rickiepark/hg-mldl/master/fruits_300.npy Resolving raw.githubusercontent.com (raw.githubusercontent.com)... 185.199.108.133, 185.199.109.133, 185.199.110.133, ... Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|185.199.108.133|:443... connected. HTTP request sent, awaiting response... 200 OK Length: 3000128 (2.9M) [application/octet-stream] Saving to: ‘fruits_300.npy’ fruits_300.npy 100%[===================>] 2.86M --.-KB/s in 0.01s 2024-11-24 09:01:56 (241 MB/s) - ‘fruits_300.npy’ saved [3000128/3000128]
In [51]:
#데이터 로드
import numpy as np
import matplotlib.pyplot as plt
fruits = np.load('fruits_300.npy')
print(fruits.shape) #(샘플 개수, 이미지 높이, 이미지 너비)
(300, 100, 100)
In [52]:
#첫 번째 이미지 첫 행
print(fruits[0, 0, :]) #0~255
[ 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 1 2 2 2 2 2 2 1 1 1 1 1 1 1 1 2 3 2 1 2 1 1 1 1 2 1 3 2 1 3 1 4 1 2 5 5 5 19 148 192 117 28 1 1 2 1 4 1 1 3 1 1 1 1 1 2 2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1]
In [53]:
#이미지 그림으로
plt.imshow(fruits[0], cmap='gray')
plt.show()
In [54]:
#흑백 다시 반전
plt.imshow(fruits[0], cmap='gray_r')
plt.show()
In [55]:
#파인애플
fig, axs = plt.subplots(1, 2) #그래프 행, 열
axs[0].imshow(fruits[100], cmap='gray_r')
#바나나
axs[1].imshow(fruits[200], cmap='gray_r')
plt.show()
In [56]:
apple = fruits[0:100].reshape(-1, 100*100)
pineapple = fruits[100:200].reshape(-1, 100*100)
banana = fruits[200:300].reshape(-1, 100*100)
print(apple.shape)
(100, 10000)
In [57]:
print(apple.mean(axis=1))
[ 88.3346 97.9249 87.3709 98.3703 92.8705 82.6439 94.4244 95.5999 90.681 81.6226 87.0578 95.0745 93.8416 87.017 97.5078 87.2019 88.9827 100.9158 92.7823 100.9184 104.9854 88.674 99.5643 97.2495 94.1179 92.1935 95.1671 93.3322 102.8967 94.6695 90.5285 89.0744 97.7641 97.2938 100.7564 90.5236 100.2542 85.8452 96.4615 97.1492 90.711 102.3193 87.1629 89.8751 86.7327 86.3991 95.2865 89.1709 96.8163 91.6604 96.1065 99.6829 94.9718 87.4812 89.2596 89.5268 93.799 97.3983 87.151 97.825 103.22 94.4239 83.6657 83.5159 102.8453 87.0379 91.2742 100.4848 93.8388 90.8568 97.4616 97.5022 82.446 87.1789 96.9206 90.3135 90.565 97.6538 98.0919 93.6252 87.3867 84.7073 89.1135 86.7646 88.7301 86.643 96.7323 97.2604 81.9424 87.1687 97.2066 83.4712 95.9781 91.8096 98.4086 100.7823 101.556 100.7027 91.6098 88.8976]
In [58]:
#히스토그램
plt.hist(np.mean(apple, axis=1), color='r', alpha=0.8)
plt.hist(np.mean(pineapple, axis=1), color='orange', alpha=0.8)
plt.hist(np.mean(banana, axis=1), color='y', alpha=0.8)
plt.legend(['apple', 'pineapple', 'banana'])
plt.show()
In [59]:
#픽셀별 평균
fig, axs = plt.subplots(1, 3, figsize=(20,5))
axs[0].bar(range(10000), np.mean(apple, axis=0))
axs[1].bar(range(10000), np.mean(pineapple, axis=0))
axs[2].bar(range(10000), np.mean(banana, axis=0))
plt.show()
In [60]:
apple_mean = np.mean(apple, axis=0).reshape(100, 100)
pineapple_mean = np.mean(pineapple, axis=0).reshape(100, 100)
banana_mean = np.mean(banana, axis=0).reshape(100, 100)
fig, axs = plt.subplots(1, 3, figsize=(20,5))
axs[0].imshow(apple_mean, cmap='gray_r')
axs[1].imshow(pineapple_mean, cmap='gray_r')
axs[2].imshow(banana_mean, cmap='gray_r')
plt.show()
In [61]:
#평균값과 가까운 사진 고르기(절댓값 오차 이용)
abs_diff = np.abs(fruits - apple_mean)
abs_mean = np.mean(abs_diff, axis=(1,2))
print(abs_mean.shape)
(300,)
In [62]:
#작은 순으로 100개
apple_index = np.argsort(abs_mean)[:100]
fig, axs = plt.subplots(10, 10, figsize=(10,10))
for i in range(10):
for j in range(10):
axs[i, j].imshow(fruits[apple_index[i*10 + j]], cmap='gray_r')
axs[i, j].axis('off') #좌표축 없앰
plt.show()
In [63]:
#바나나 100개
abs_diff_b = np.abs(fruits - banana_mean)
abs_mean_b = np.mean(abs_diff_b, axis=(1,2))
banana_index = np.argsort(abs_mean_b)[:100]
fig, axs = plt.subplots(10, 10, figsize=(10,10))
for i in range(10):
for j in range(10):
axs[i, j].imshow(fruits[banana_index[i*10 + j]], cmap='gray_r')
axs[i, j].axis('off')
plt.show()
2) k-평균
-클러스터 중심/센트로이드In [64]:
#데이터 다운
!wget https://bit.ly/fruits_300_data -O fruits_300.npy
--2024-11-24 09:02:53-- https://bit.ly/fruits_300_data Resolving bit.ly (bit.ly)... 67.199.248.11, 67.199.248.10 Connecting to bit.ly (bit.ly)|67.199.248.11|:443... connected. HTTP request sent, awaiting response... 301 Moved Permanently Location: https://github.com/rickiepark/hg-mldl/raw/master/fruits_300.npy [following] --2024-11-24 09:02:53-- https://github.com/rickiepark/hg-mldl/raw/master/fruits_300.npy Resolving github.com (github.com)... 140.82.113.4 Connecting to github.com (github.com)|140.82.113.4|:443... connected. HTTP request sent, awaiting response... 302 Found Location: https://raw.githubusercontent.com/rickiepark/hg-mldl/master/fruits_300.npy [following] --2024-11-24 09:02:54-- https://raw.githubusercontent.com/rickiepark/hg-mldl/master/fruits_300.npy Resolving raw.githubusercontent.com (raw.githubusercontent.com)... 185.199.108.133, 185.199.109.133, 185.199.110.133, ... Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|185.199.108.133|:443... connected. HTTP request sent, awaiting response... 200 OK Length: 3000128 (2.9M) [application/octet-stream] Saving to: ‘fruits_300.npy’ fruits_300.npy 100%[===================>] 2.86M --.-KB/s in 0.02s 2024-11-24 09:02:54 (168 MB/s) - ‘fruits_300.npy’ saved [3000128/3000128]
In [65]:
import numpy as np
fruits = np.load('fruits_300.npy')
fruits_2d = fruits.reshape(-1, 100*100)
from sklearn.cluster import KMeans
km = KMeans(n_clusters=3, random_state=42)
km.fit(fruits_2d)
print(km.labels_) #군집된 결과
[2 2 2 2 2 0 2 2 2 2 2 2 2 2 2 2 2 2 0 2 2 2 2 2 2 2 0 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 0 2 0 2 2 2 2 2 2 2 0 2 2 2 2 2 2 2 2 2 0 0 2 2 2 2 2 2 2 2 0 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 0 2 2 2 2 2 2 2 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1]
In [66]:
print(np.unique(km.labels_, return_counts=True))
(array([0, 1, 2], dtype=int32), array([112, 98, 90]))
In [67]:
#그림 출력 위한 유틸리티 함수
import matplotlib.pyplot as plt
def draw_fruits(arr, ratio=1):
n = len(arr)
rows = int(np.ceil(n/10))
cols = n if rows < 2 else 10
fig, axs = plt.subplots(rows, cols,
figsize=(cols*ratio, rows*ratio), squeeze=False)
for i in range(rows):
for j in range(cols):
if i*10 + j < n:
axs[i, j].imshow(arr[i*10 + j], cmap='gray_r')
axs[i, j].axis('off')
plt.show()
In [68]:
draw_fruits(fruits[km.labels_==0])
In [69]:
draw_fruits(fruits[km.labels_==1])
In [70]:
draw_fruits(fruits[km.labels_==2])
In [71]:
#최종으로 찾은 클러스터 중심
draw_fruits(km.cluster_centers_.reshape(-1, 100, 100), ratio=3)
In [72]:
#훈련 데이터 샘플에서 클러스터 중심까지 거리
print(km.transform(fruits_2d[100:101]))
[[3400.24197319 8837.37750892 5279.33763699]]
In [73]:
#가장 가까운 클러스터 중심 예측
print(km.predict(fruits_2d[100:101]))
[0]
In [74]:
draw_fruits(fruits[100:101])
In [75]:
#알고리즘 반복 횟수
print(km.n_iter_)
4
최적의 클러스터 개수 찾기
->엘보우
: 클러스터 개수 늘려가면서 이너셔(거리 제곱합) 변화 관찰In [76]:
#이너셔값 그래프
inertia = []
for k in range(2, 7):
km = KMeans(n_clusters=k, random_state=42)
km.fit(fruits_2d)
inertia.append(km.inertia_)
plt.plot(range(2, 7), inertia)
plt.xlabel('k')
plt.ylabel('inertia')
plt.show()
#3에 꺾이는 부분
3) 주성분 분석
-차원 축소 알고리즘 PCA 모델In [77]:
!wget https://bit.ly/fruits_300_data -O fruits_300.npy
import numpy as np
fruits = np.load('fruits_300.npy')
fruits_2d = fruits.reshape(-1, 100*100)
--2024-11-24 09:03:04-- https://bit.ly/fruits_300_data Resolving bit.ly (bit.ly)... 67.199.248.11, 67.199.248.10 Connecting to bit.ly (bit.ly)|67.199.248.11|:443... connected. HTTP request sent, awaiting response... 301 Moved Permanently Location: https://github.com/rickiepark/hg-mldl/raw/master/fruits_300.npy [following] --2024-11-24 09:03:04-- https://github.com/rickiepark/hg-mldl/raw/master/fruits_300.npy Resolving github.com (github.com)... 140.82.112.3 Connecting to github.com (github.com)|140.82.112.3|:443... connected. HTTP request sent, awaiting response... 302 Found Location: https://raw.githubusercontent.com/rickiepark/hg-mldl/master/fruits_300.npy [following] --2024-11-24 09:03:04-- https://raw.githubusercontent.com/rickiepark/hg-mldl/master/fruits_300.npy Resolving raw.githubusercontent.com (raw.githubusercontent.com)... 185.199.108.133, 185.199.109.133, 185.199.110.133, ... Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|185.199.108.133|:443... connected. HTTP request sent, awaiting response... 200 OK Length: 3000128 (2.9M) [application/octet-stream] Saving to: ‘fruits_300.npy’ fruits_300.npy 100%[===================>] 2.86M --.-KB/s in 0.01s 2024-11-24 09:03:04 (232 MB/s) - ‘fruits_300.npy’ saved [3000128/3000128]
In [78]:
from sklearn.decomposition import PCA
pca = PCA(n_components=50) #주성분 개수
pca.fit(fruits_2d)
print(pca.components_.shape) #(주성분, 원본 데이터 특성 개수)
(50, 10000)
In [79]:
draw_fruits(pca.components_.reshape(-1, 100, 100))
In [80]:
print(fruits_2d.shape)
#원본 데이터 차원 50으로 줄이기
fruits_pca = pca.transform(fruits_2d)
print(fruits_pca.shape)
(300, 10000) (300, 50)
In [81]:
#원본 데이터 재구성
fruits_inverse = pca.inverse_transform(fruits_pca)
print(fruits_inverse.shape)
(300, 10000)
In [82]:
fruits_reconstruct = fruits_inverse.reshape(-1, 100, 100)
for start in [0, 100, 200]:
draw_fruits(fruits_reconstruct[start:start+100])
print("\n")
In [83]:
#설명된 분산(주성분이 원본 데이터 분산 얼마나 잘 나타내는지)
print(np.sum(pca.explained_variance_ratio_))
0.9215260436553633
In [84]:
plt.plot(pca.explained_variance_ratio_)
plt.show()
#처음 10개 주성분이 대부분의 분산 표현
In [85]:
#PCA로 자원 축소된 데이터 사용해서 지도 학습 모델 훈련
#1) 로지스틱 회귀 모델 사용(원본 데이터)
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression()
target = np.array([0]*100 + [1]*100 + [2]*100)
from sklearn.model_selection import cross_validate
scores = cross_validate(lr, fruits_2d, target)
print(np.mean(scores['test_score']))
print(np.mean(scores['fit_time']))
0.9966666666666667 0.9908132076263427
In [86]:
#2) PCA로 축소한 fruits_pca 사용
scores = cross_validate(lr, fruits_pca, target)
print(np.mean(scores['test_score']))
print(np.mean(scores['fit_time']))
0.9966666666666667 0.023668861389160155
In [87]:
#원하는 설명된 분산 비율 입력
pca = PCA(n_components=0.5)
pca.fit(fruits_2d)
print(pca.n_components_)
2
In [88]:
fruits_pca = pca.transform(fruits_2d)
print(fruits_pca.shape)
(300, 2)
In [89]:
scores = cross_validate(lr, fruits_pca, target)
print(np.mean(scores['test_score']))
print(np.mean(scores['fit_time']))
0.9933333333333334 0.029929971694946288
/usr/local/lib/python3.10/dist-packages/sklearn/linear_model/_logistic.py:469: ConvergenceWarning: lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
Increase the number of iterations (max_iter) or scale the data as shown in:
https://scikit-learn.org/stable/modules/preprocessing.html
Please also refer to the documentation for alternative solver options:
https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression
n_iter_i = _check_optimize_result(
/usr/local/lib/python3.10/dist-packages/sklearn/linear_model/_logistic.py:469: ConvergenceWarning: lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
Increase the number of iterations (max_iter) or scale the data as shown in:
https://scikit-learn.org/stable/modules/preprocessing.html
Please also refer to the documentation for alternative solver options:
https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression
n_iter_i = _check_optimize_result(
In [90]:
#k-평균 알고리즘으로 클러스터 찾기
from sklearn.cluster import KMeans
km = KMeans(n_clusters=3, random_state=42)
km.fit(fruits_pca)
print(np.unique(km.labels_, return_counts=True))
(array([0, 1, 2], dtype=int32), array([110, 99, 91]))
In [91]:
for label in range(0, 3):
draw_fruits(fruits[km.labels_==label])
print("\n")
In [92]:
#산점도
for label in range(0, 3):
data = fruits_pca[km.labels_==label]
plt.scatter(data[:, 0], data[:, 1])
plt.legend(['apple', 'banana', 'pineapple'])
plt.show()